Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / tools / perf / util / cpumap.c
blob87d3eca9b872d81b86df3e5a38ec59aa149c458b
1 // SPDX-License-Identifier: GPL-2.0
2 #include <api/fs/fs.h>
3 #include "cpumap.h"
4 #include "debug.h"
5 #include "event.h"
6 #include <assert.h>
7 #include <dirent.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <linux/bitmap.h>
11 #include "asm/bug.h"
13 #include <linux/ctype.h>
14 #include <linux/zalloc.h>
16 static int max_cpu_num;
17 static int max_present_cpu_num;
18 static int max_node_num;
19 static int *cpunode_map;
21 static struct perf_cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus)
23 struct perf_cpu_map *map;
25 map = perf_cpu_map__empty_new(cpus->nr);
26 if (map) {
27 unsigned i;
29 for (i = 0; i < cpus->nr; i++) {
31 * Special treatment for -1, which is not real cpu number,
32 * and we need to use (int) -1 to initialize map[i],
33 * otherwise it would become 65535.
35 if (cpus->cpu[i] == (u16) -1)
36 map->map[i] = -1;
37 else
38 map->map[i] = (int) cpus->cpu[i];
42 return map;
45 static struct perf_cpu_map *cpu_map__from_mask(struct perf_record_record_cpu_map *mask)
47 struct perf_cpu_map *map;
48 int nr, nbits = mask->nr * mask->long_size * BITS_PER_BYTE;
50 nr = bitmap_weight(mask->mask, nbits);
52 map = perf_cpu_map__empty_new(nr);
53 if (map) {
54 int cpu, i = 0;
56 for_each_set_bit(cpu, mask->mask, nbits)
57 map->map[i++] = cpu;
59 return map;
63 struct perf_cpu_map *cpu_map__new_data(struct perf_record_cpu_map_data *data)
65 if (data->type == PERF_CPU_MAP__CPUS)
66 return cpu_map__from_entries((struct cpu_map_entries *)data->data);
67 else
68 return cpu_map__from_mask((struct perf_record_record_cpu_map *)data->data);
71 size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp)
73 #define BUFSIZE 1024
74 char buf[BUFSIZE];
76 cpu_map__snprint(map, buf, sizeof(buf));
77 return fprintf(fp, "%s\n", buf);
78 #undef BUFSIZE
81 struct perf_cpu_map *perf_cpu_map__empty_new(int nr)
83 struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr);
85 if (cpus != NULL) {
86 int i;
88 cpus->nr = nr;
89 for (i = 0; i < nr; i++)
90 cpus->map[i] = -1;
92 refcount_set(&cpus->refcnt, 1);
95 return cpus;
98 struct cpu_aggr_map *cpu_aggr_map__empty_new(int nr)
100 struct cpu_aggr_map *cpus = malloc(sizeof(*cpus) + sizeof(struct aggr_cpu_id) * nr);
102 if (cpus != NULL) {
103 int i;
105 cpus->nr = nr;
106 for (i = 0; i < nr; i++)
107 cpus->map[i] = cpu_map__empty_aggr_cpu_id();
109 refcount_set(&cpus->refcnt, 1);
112 return cpus;
115 static int cpu__get_topology_int(int cpu, const char *name, int *value)
117 char path[PATH_MAX];
119 snprintf(path, PATH_MAX,
120 "devices/system/cpu/cpu%d/topology/%s", cpu, name);
122 return sysfs__read_int(path, value);
125 int cpu_map__get_socket_id(int cpu)
127 int value, ret = cpu__get_topology_int(cpu, "physical_package_id", &value);
128 return ret ?: value;
131 struct aggr_cpu_id cpu_map__get_socket(struct perf_cpu_map *map, int idx,
132 void *data __maybe_unused)
134 int cpu;
135 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id();
137 if (idx > map->nr)
138 return id;
140 cpu = map->map[idx];
142 id.socket = cpu_map__get_socket_id(cpu);
143 return id;
146 static int cmp_aggr_cpu_id(const void *a_pointer, const void *b_pointer)
148 struct aggr_cpu_id *a = (struct aggr_cpu_id *)a_pointer;
149 struct aggr_cpu_id *b = (struct aggr_cpu_id *)b_pointer;
151 if (a->node != b->node)
152 return a->node - b->node;
153 else if (a->socket != b->socket)
154 return a->socket - b->socket;
155 else if (a->die != b->die)
156 return a->die - b->die;
157 else if (a->core != b->core)
158 return a->core - b->core;
159 else
160 return a->thread - b->thread;
163 int cpu_map__build_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **res,
164 struct aggr_cpu_id (*f)(struct perf_cpu_map *map, int cpu, void *data),
165 void *data)
167 int nr = cpus->nr;
168 struct cpu_aggr_map *c = cpu_aggr_map__empty_new(nr);
169 int cpu, s2;
170 struct aggr_cpu_id s1;
172 if (!c)
173 return -1;
175 /* Reset size as it may only be partially filled */
176 c->nr = 0;
178 for (cpu = 0; cpu < nr; cpu++) {
179 s1 = f(cpus, cpu, data);
180 for (s2 = 0; s2 < c->nr; s2++) {
181 if (cpu_map__compare_aggr_cpu_id(s1, c->map[s2]))
182 break;
184 if (s2 == c->nr) {
185 c->map[c->nr] = s1;
186 c->nr++;
189 /* ensure we process id in increasing order */
190 qsort(c->map, c->nr, sizeof(struct aggr_cpu_id), cmp_aggr_cpu_id);
192 *res = c;
193 return 0;
196 int cpu_map__get_die_id(int cpu)
198 int value, ret = cpu__get_topology_int(cpu, "die_id", &value);
200 return ret ?: value;
203 struct aggr_cpu_id cpu_map__get_die(struct perf_cpu_map *map, int idx, void *data)
205 int cpu, die;
206 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id();
208 if (idx > map->nr)
209 return id;
211 cpu = map->map[idx];
213 die = cpu_map__get_die_id(cpu);
214 /* There is no die_id on legacy system. */
215 if (die == -1)
216 die = 0;
219 * die_id is relative to socket, so start
220 * with the socket ID and then add die to
221 * make a unique ID.
223 id = cpu_map__get_socket(map, idx, data);
224 if (cpu_map__aggr_cpu_id_is_empty(id))
225 return id;
227 id.die = die;
228 return id;
231 int cpu_map__get_core_id(int cpu)
233 int value, ret = cpu__get_topology_int(cpu, "core_id", &value);
234 return ret ?: value;
237 int cpu_map__get_node_id(int cpu)
239 return cpu__get_node(cpu);
242 struct aggr_cpu_id cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data)
244 int cpu;
245 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id();
247 if (idx > map->nr)
248 return id;
250 cpu = map->map[idx];
252 cpu = cpu_map__get_core_id(cpu);
254 /* cpu_map__get_die returns a struct with socket and die set*/
255 id = cpu_map__get_die(map, idx, data);
256 if (cpu_map__aggr_cpu_id_is_empty(id))
257 return id;
260 * core_id is relative to socket and die, we need a global id.
261 * So we combine the result from cpu_map__get_die with the core id
263 id.core = cpu;
264 return id;
267 struct aggr_cpu_id cpu_map__get_node(struct perf_cpu_map *map, int idx, void *data __maybe_unused)
269 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id();
271 if (idx < 0 || idx >= map->nr)
272 return id;
274 id.node = cpu_map__get_node_id(map->map[idx]);
275 return id;
278 int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **sockp)
280 return cpu_map__build_map(cpus, sockp, cpu_map__get_socket, NULL);
283 int cpu_map__build_die_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **diep)
285 return cpu_map__build_map(cpus, diep, cpu_map__get_die, NULL);
288 int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **corep)
290 return cpu_map__build_map(cpus, corep, cpu_map__get_core, NULL);
293 int cpu_map__build_node_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **numap)
295 return cpu_map__build_map(cpus, numap, cpu_map__get_node, NULL);
298 /* setup simple routines to easily access node numbers given a cpu number */
299 static int get_max_num(char *path, int *max)
301 size_t num;
302 char *buf;
303 int err = 0;
305 if (filename__read_str(path, &buf, &num))
306 return -1;
308 buf[num] = '\0';
310 /* start on the right, to find highest node num */
311 while (--num) {
312 if ((buf[num] == ',') || (buf[num] == '-')) {
313 num++;
314 break;
317 if (sscanf(&buf[num], "%d", max) < 1) {
318 err = -1;
319 goto out;
322 /* convert from 0-based to 1-based */
323 (*max)++;
325 out:
326 free(buf);
327 return err;
330 /* Determine highest possible cpu in the system for sparse allocation */
331 static void set_max_cpu_num(void)
333 const char *mnt;
334 char path[PATH_MAX];
335 int ret = -1;
337 /* set up default */
338 max_cpu_num = 4096;
339 max_present_cpu_num = 4096;
341 mnt = sysfs__mountpoint();
342 if (!mnt)
343 goto out;
345 /* get the highest possible cpu number for a sparse allocation */
346 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
347 if (ret >= PATH_MAX) {
348 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
349 goto out;
352 ret = get_max_num(path, &max_cpu_num);
353 if (ret)
354 goto out;
356 /* get the highest present cpu number for a sparse allocation */
357 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
358 if (ret >= PATH_MAX) {
359 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
360 goto out;
363 ret = get_max_num(path, &max_present_cpu_num);
365 out:
366 if (ret)
367 pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num);
370 /* Determine highest possible node in the system for sparse allocation */
371 static void set_max_node_num(void)
373 const char *mnt;
374 char path[PATH_MAX];
375 int ret = -1;
377 /* set up default */
378 max_node_num = 8;
380 mnt = sysfs__mountpoint();
381 if (!mnt)
382 goto out;
384 /* get the highest possible cpu number for a sparse allocation */
385 ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
386 if (ret >= PATH_MAX) {
387 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
388 goto out;
391 ret = get_max_num(path, &max_node_num);
393 out:
394 if (ret)
395 pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
398 int cpu__max_node(void)
400 if (unlikely(!max_node_num))
401 set_max_node_num();
403 return max_node_num;
406 int cpu__max_cpu(void)
408 if (unlikely(!max_cpu_num))
409 set_max_cpu_num();
411 return max_cpu_num;
414 int cpu__max_present_cpu(void)
416 if (unlikely(!max_present_cpu_num))
417 set_max_cpu_num();
419 return max_present_cpu_num;
423 int cpu__get_node(int cpu)
425 if (unlikely(cpunode_map == NULL)) {
426 pr_debug("cpu_map not initialized\n");
427 return -1;
430 return cpunode_map[cpu];
433 static int init_cpunode_map(void)
435 int i;
437 set_max_cpu_num();
438 set_max_node_num();
440 cpunode_map = calloc(max_cpu_num, sizeof(int));
441 if (!cpunode_map) {
442 pr_err("%s: calloc failed\n", __func__);
443 return -1;
446 for (i = 0; i < max_cpu_num; i++)
447 cpunode_map[i] = -1;
449 return 0;
452 int cpu__setup_cpunode_map(void)
454 struct dirent *dent1, *dent2;
455 DIR *dir1, *dir2;
456 unsigned int cpu, mem;
457 char buf[PATH_MAX];
458 char path[PATH_MAX];
459 const char *mnt;
460 int n;
462 /* initialize globals */
463 if (init_cpunode_map())
464 return -1;
466 mnt = sysfs__mountpoint();
467 if (!mnt)
468 return 0;
470 n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
471 if (n >= PATH_MAX) {
472 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
473 return -1;
476 dir1 = opendir(path);
477 if (!dir1)
478 return 0;
480 /* walk tree and setup map */
481 while ((dent1 = readdir(dir1)) != NULL) {
482 if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
483 continue;
485 n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
486 if (n >= PATH_MAX) {
487 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
488 continue;
491 dir2 = opendir(buf);
492 if (!dir2)
493 continue;
494 while ((dent2 = readdir(dir2)) != NULL) {
495 if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
496 continue;
497 cpunode_map[cpu] = mem;
499 closedir(dir2);
501 closedir(dir1);
502 return 0;
505 bool cpu_map__has(struct perf_cpu_map *cpus, int cpu)
507 return perf_cpu_map__idx(cpus, cpu) != -1;
510 int cpu_map__cpu(struct perf_cpu_map *cpus, int idx)
512 return cpus->map[idx];
515 size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size)
517 int i, cpu, start = -1;
518 bool first = true;
519 size_t ret = 0;
521 #define COMMA first ? "" : ","
523 for (i = 0; i < map->nr + 1; i++) {
524 bool last = i == map->nr;
526 cpu = last ? INT_MAX : map->map[i];
528 if (start == -1) {
529 start = i;
530 if (last) {
531 ret += snprintf(buf + ret, size - ret,
532 "%s%d", COMMA,
533 map->map[i]);
535 } else if (((i - start) != (cpu - map->map[start])) || last) {
536 int end = i - 1;
538 if (start == end) {
539 ret += snprintf(buf + ret, size - ret,
540 "%s%d", COMMA,
541 map->map[start]);
542 } else {
543 ret += snprintf(buf + ret, size - ret,
544 "%s%d-%d", COMMA,
545 map->map[start], map->map[end]);
547 first = false;
548 start = i;
552 #undef COMMA
554 pr_debug2("cpumask list: %s\n", buf);
555 return ret;
558 static char hex_char(unsigned char val)
560 if (val < 10)
561 return val + '0';
562 if (val < 16)
563 return val - 10 + 'a';
564 return '?';
567 size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
569 int i, cpu;
570 char *ptr = buf;
571 unsigned char *bitmap;
572 int last_cpu = cpu_map__cpu(map, map->nr - 1);
574 if (buf == NULL)
575 return 0;
577 bitmap = zalloc(last_cpu / 8 + 1);
578 if (bitmap == NULL) {
579 buf[0] = '\0';
580 return 0;
583 for (i = 0; i < map->nr; i++) {
584 cpu = cpu_map__cpu(map, i);
585 bitmap[cpu / 8] |= 1 << (cpu % 8);
588 for (cpu = last_cpu / 4 * 4; cpu >= 0; cpu -= 4) {
589 unsigned char bits = bitmap[cpu / 8];
591 if (cpu % 8)
592 bits >>= 4;
593 else
594 bits &= 0xf;
596 *ptr++ = hex_char(bits);
597 if ((cpu % 32) == 0 && cpu > 0)
598 *ptr++ = ',';
600 *ptr = '\0';
601 free(bitmap);
603 buf[size - 1] = '\0';
604 return ptr - buf;
607 const struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */
609 static const struct perf_cpu_map *online = NULL;
611 if (!online)
612 online = perf_cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */
614 return online;
617 bool cpu_map__compare_aggr_cpu_id(struct aggr_cpu_id a, struct aggr_cpu_id b)
619 return a.thread == b.thread &&
620 a.node == b.node &&
621 a.socket == b.socket &&
622 a.die == b.die &&
623 a.core == b.core;
626 bool cpu_map__aggr_cpu_id_is_empty(struct aggr_cpu_id a)
628 return a.thread == -1 &&
629 a.node == -1 &&
630 a.socket == -1 &&
631 a.die == -1 &&
632 a.core == -1;
635 struct aggr_cpu_id cpu_map__empty_aggr_cpu_id(void)
637 struct aggr_cpu_id ret = {
638 .thread = -1,
639 .node = -1,
640 .socket = -1,
641 .die = -1,
642 .core = -1
644 return ret;