1 // SPDX-License-Identifier: GPL-2.0
2 /* Manage affinity to optimize IPIs inside the kernel perf API. */
6 #include <linux/bitmap.h>
7 #include <linux/zalloc.h>
12 static int get_cpu_set_size(void)
14 int sz
= cpu__max_cpu().cpu
+ 8 - 1;
16 * sched_getaffinity doesn't like masks smaller than the kernel.
17 * Hopefully that's big enough.
24 int affinity__setup(struct affinity
*a
)
26 int cpu_set_size
= get_cpu_set_size();
28 a
->orig_cpus
= bitmap_zalloc(cpu_set_size
* 8);
31 sched_getaffinity(0, cpu_set_size
, (cpu_set_t
*)a
->orig_cpus
);
32 a
->sched_cpus
= bitmap_zalloc(cpu_set_size
* 8);
37 bitmap_zero((unsigned long *)a
->sched_cpus
, cpu_set_size
);
43 * perf_event_open does an IPI internally to the target CPU.
44 * It is more efficient to change perf's affinity to the target
45 * CPU and then set up all events on that CPU, so we amortize
48 void affinity__set(struct affinity
*a
, int cpu
)
50 int cpu_set_size
= get_cpu_set_size();
55 * - restrict out of bound access to sched_cpus
57 if (cpu
== -1 || ((cpu
>= (cpu_set_size
* 8))))
61 __set_bit(cpu
, a
->sched_cpus
);
63 * We ignore errors because affinity is just an optimization.
64 * This could happen for example with isolated CPUs or cpusets.
65 * In this case the IPIs inside the kernel's perf API still work.
67 sched_setaffinity(0, cpu_set_size
, (cpu_set_t
*)a
->sched_cpus
);
68 __clear_bit(cpu
, a
->sched_cpus
);
71 static void __affinity__cleanup(struct affinity
*a
)
73 int cpu_set_size
= get_cpu_set_size();
76 sched_setaffinity(0, cpu_set_size
, (cpu_set_t
*)a
->orig_cpus
);
77 zfree(&a
->sched_cpus
);
81 void affinity__cleanup(struct affinity
*a
)
84 __affinity__cleanup(a
);