1 // SPDX-License-Identifier: GPL-2.0
3 * Arch specific cpu topology information
5 * Copyright (C) 2016, ARM Ltd.
6 * Written by: Juri Lelli, ARM Ltd.
9 #include <linux/acpi.h>
10 #include <linux/cpu.h>
11 #include <linux/cpufreq.h>
12 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 #include <linux/sched/topology.h>
17 #include <linux/cpuset.h>
18 #include <linux/cpumask.h>
19 #include <linux/init.h>
20 #include <linux/percpu.h>
21 #include <linux/sched.h>
22 #include <linux/smp.h>
24 __weak
bool arch_freq_counters_available(struct cpumask
*cpus
)
28 DEFINE_PER_CPU(unsigned long, freq_scale
) = SCHED_CAPACITY_SCALE
;
30 void arch_set_freq_scale(struct cpumask
*cpus
, unsigned long cur_freq
,
31 unsigned long max_freq
)
37 * If the use of counters for FIE is enabled, just return as we don't
38 * want to update the scale factor with information from CPUFREQ.
39 * Instead the scale factor will be updated from arch_scale_freq_tick.
41 if (arch_freq_counters_available(cpus
))
44 scale
= (cur_freq
<< SCHED_CAPACITY_SHIFT
) / max_freq
;
47 per_cpu(freq_scale
, i
) = scale
;
50 DEFINE_PER_CPU(unsigned long, cpu_scale
) = SCHED_CAPACITY_SCALE
;
52 void topology_set_cpu_scale(unsigned int cpu
, unsigned long capacity
)
54 per_cpu(cpu_scale
, cpu
) = capacity
;
57 static ssize_t
cpu_capacity_show(struct device
*dev
,
58 struct device_attribute
*attr
,
61 struct cpu
*cpu
= container_of(dev
, struct cpu
, dev
);
63 return sprintf(buf
, "%lu\n", topology_get_cpu_scale(cpu
->dev
.id
));
66 static void update_topology_flags_workfn(struct work_struct
*work
);
67 static DECLARE_WORK(update_topology_flags_work
, update_topology_flags_workfn
);
69 static DEVICE_ATTR_RO(cpu_capacity
);
71 static int register_cpu_capacity_sysctl(void)
76 for_each_possible_cpu(i
) {
77 cpu
= get_cpu_device(i
);
79 pr_err("%s: too early to get CPU%d device!\n",
83 device_create_file(cpu
, &dev_attr_cpu_capacity
);
88 subsys_initcall(register_cpu_capacity_sysctl
);
90 static int update_topology
;
92 int topology_update_cpu_topology(void)
94 return update_topology
;
98 * Updating the sched_domains can't be done directly from cpufreq callbacks
99 * due to locking, so queue the work for later.
101 static void update_topology_flags_workfn(struct work_struct
*work
)
104 rebuild_sched_domains();
105 pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
109 static DEFINE_PER_CPU(u32
, freq_factor
) = 1;
110 static u32
*raw_capacity
;
112 static int free_raw_capacity(void)
120 void topology_normalize_cpu_scale(void)
130 for_each_possible_cpu(cpu
) {
131 capacity
= raw_capacity
[cpu
] * per_cpu(freq_factor
, cpu
);
132 capacity_scale
= max(capacity
, capacity_scale
);
135 pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale
);
136 for_each_possible_cpu(cpu
) {
137 capacity
= raw_capacity
[cpu
] * per_cpu(freq_factor
, cpu
);
138 capacity
= div64_u64(capacity
<< SCHED_CAPACITY_SHIFT
,
140 topology_set_cpu_scale(cpu
, capacity
);
141 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
142 cpu
, topology_get_cpu_scale(cpu
));
146 bool __init
topology_parse_cpu_capacity(struct device_node
*cpu_node
, int cpu
)
149 static bool cap_parsing_failed
;
153 if (cap_parsing_failed
)
156 ret
= of_property_read_u32(cpu_node
, "capacity-dmips-mhz",
160 raw_capacity
= kcalloc(num_possible_cpus(),
161 sizeof(*raw_capacity
),
164 cap_parsing_failed
= true;
168 raw_capacity
[cpu
] = cpu_capacity
;
169 pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
170 cpu_node
, raw_capacity
[cpu
]);
173 * Update freq_factor for calculating early boot cpu capacities.
174 * For non-clk CPU DVFS mechanism, there's no way to get the
175 * frequency value now, assuming they are running at the same
176 * frequency (by keeping the initial freq_factor value).
178 cpu_clk
= of_clk_get(cpu_node
, 0);
179 if (!PTR_ERR_OR_ZERO(cpu_clk
)) {
180 per_cpu(freq_factor
, cpu
) =
181 clk_get_rate(cpu_clk
) / 1000;
186 pr_err("cpu_capacity: missing %pOF raw capacity\n",
188 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
190 cap_parsing_failed
= true;
197 #ifdef CONFIG_CPU_FREQ
198 static cpumask_var_t cpus_to_visit
;
199 static void parsing_done_workfn(struct work_struct
*work
);
200 static DECLARE_WORK(parsing_done_work
, parsing_done_workfn
);
203 init_cpu_capacity_callback(struct notifier_block
*nb
,
207 struct cpufreq_policy
*policy
= data
;
213 if (val
!= CPUFREQ_CREATE_POLICY
)
216 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
217 cpumask_pr_args(policy
->related_cpus
),
218 cpumask_pr_args(cpus_to_visit
));
220 cpumask_andnot(cpus_to_visit
, cpus_to_visit
, policy
->related_cpus
);
222 for_each_cpu(cpu
, policy
->related_cpus
)
223 per_cpu(freq_factor
, cpu
) = policy
->cpuinfo
.max_freq
/ 1000;
225 if (cpumask_empty(cpus_to_visit
)) {
226 topology_normalize_cpu_scale();
227 schedule_work(&update_topology_flags_work
);
229 pr_debug("cpu_capacity: parsing done\n");
230 schedule_work(&parsing_done_work
);
236 static struct notifier_block init_cpu_capacity_notifier
= {
237 .notifier_call
= init_cpu_capacity_callback
,
240 static int __init
register_cpufreq_notifier(void)
245 * on ACPI-based systems we need to use the default cpu capacity
246 * until we have the necessary code to parse the cpu capacity, so
247 * skip registering cpufreq notifier.
249 if (!acpi_disabled
|| !raw_capacity
)
252 if (!alloc_cpumask_var(&cpus_to_visit
, GFP_KERNEL
))
255 cpumask_copy(cpus_to_visit
, cpu_possible_mask
);
257 ret
= cpufreq_register_notifier(&init_cpu_capacity_notifier
,
258 CPUFREQ_POLICY_NOTIFIER
);
261 free_cpumask_var(cpus_to_visit
);
265 core_initcall(register_cpufreq_notifier
);
267 static void parsing_done_workfn(struct work_struct
*work
)
269 cpufreq_unregister_notifier(&init_cpu_capacity_notifier
,
270 CPUFREQ_POLICY_NOTIFIER
);
271 free_cpumask_var(cpus_to_visit
);
275 core_initcall(free_raw_capacity
);
278 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
280 * This function returns the logic cpu number of the node.
281 * There are basically three kinds of return values:
282 * (1) logic cpu number which is > 0.
283 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
284 * there is no possible logical CPU in the kernel to match. This happens
285 * when CONFIG_NR_CPUS is configure to be smaller than the number of
286 * CPU nodes in DT. We need to just ignore this case.
287 * (3) -1 if the node does not exist in the device tree
289 static int __init
get_cpu_for_node(struct device_node
*node
)
291 struct device_node
*cpu_node
;
294 cpu_node
= of_parse_phandle(node
, "cpu", 0);
298 cpu
= of_cpu_node_to_id(cpu_node
);
300 topology_parse_cpu_capacity(cpu_node
, cpu
);
302 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
303 cpu_node
, cpumask_pr_args(cpu_possible_mask
));
305 of_node_put(cpu_node
);
309 static int __init
parse_core(struct device_node
*core
, int package_id
,
316 struct device_node
*t
;
319 snprintf(name
, sizeof(name
), "thread%d", i
);
320 t
= of_get_child_by_name(core
, name
);
323 cpu
= get_cpu_for_node(t
);
325 cpu_topology
[cpu
].package_id
= package_id
;
326 cpu_topology
[cpu
].core_id
= core_id
;
327 cpu_topology
[cpu
].thread_id
= i
;
328 } else if (cpu
!= -ENODEV
) {
329 pr_err("%pOF: Can't get CPU for thread\n", t
);
338 cpu
= get_cpu_for_node(core
);
341 pr_err("%pOF: Core has both threads and CPU\n",
346 cpu_topology
[cpu
].package_id
= package_id
;
347 cpu_topology
[cpu
].core_id
= core_id
;
348 } else if (leaf
&& cpu
!= -ENODEV
) {
349 pr_err("%pOF: Can't get CPU for leaf core\n", core
);
356 static int __init
parse_cluster(struct device_node
*cluster
, int depth
)
360 bool has_cores
= false;
361 struct device_node
*c
;
362 static int package_id __initdata
;
367 * First check for child clusters; we currently ignore any
368 * information about the nesting of clusters and present the
369 * scheduler with a flat list of them.
373 snprintf(name
, sizeof(name
), "cluster%d", i
);
374 c
= of_get_child_by_name(cluster
, name
);
377 ret
= parse_cluster(c
, depth
+ 1);
385 /* Now check for cores */
388 snprintf(name
, sizeof(name
), "core%d", i
);
389 c
= of_get_child_by_name(cluster
, name
);
394 pr_err("%pOF: cpu-map children should be clusters\n",
401 ret
= parse_core(c
, package_id
, core_id
++);
403 pr_err("%pOF: Non-leaf cluster with core %s\n",
415 if (leaf
&& !has_cores
)
416 pr_warn("%pOF: empty cluster\n", cluster
);
424 static int __init
parse_dt_topology(void)
426 struct device_node
*cn
, *map
;
430 cn
= of_find_node_by_path("/cpus");
432 pr_err("No CPU information found in DT\n");
437 * When topology is provided cpu-map is essentially a root
438 * cluster with restricted subnodes.
440 map
= of_get_child_by_name(cn
, "cpu-map");
444 ret
= parse_cluster(map
, 0);
448 topology_normalize_cpu_scale();
451 * Check that all cores are in the topology; the SMP code will
452 * only mark cores described in the DT as possible.
454 for_each_possible_cpu(cpu
)
455 if (cpu_topology
[cpu
].package_id
== -1)
469 struct cpu_topology cpu_topology
[NR_CPUS
];
470 EXPORT_SYMBOL_GPL(cpu_topology
);
472 const struct cpumask
*cpu_coregroup_mask(int cpu
)
474 const cpumask_t
*core_mask
= cpumask_of_node(cpu_to_node(cpu
));
476 /* Find the smaller of NUMA, core or LLC siblings */
477 if (cpumask_subset(&cpu_topology
[cpu
].core_sibling
, core_mask
)) {
478 /* not numa in package, lets use the package siblings */
479 core_mask
= &cpu_topology
[cpu
].core_sibling
;
481 if (cpu_topology
[cpu
].llc_id
!= -1) {
482 if (cpumask_subset(&cpu_topology
[cpu
].llc_sibling
, core_mask
))
483 core_mask
= &cpu_topology
[cpu
].llc_sibling
;
489 void update_siblings_masks(unsigned int cpuid
)
491 struct cpu_topology
*cpu_topo
, *cpuid_topo
= &cpu_topology
[cpuid
];
494 /* update core and thread sibling masks */
495 for_each_online_cpu(cpu
) {
496 cpu_topo
= &cpu_topology
[cpu
];
498 if (cpuid_topo
->llc_id
== cpu_topo
->llc_id
) {
499 cpumask_set_cpu(cpu
, &cpuid_topo
->llc_sibling
);
500 cpumask_set_cpu(cpuid
, &cpu_topo
->llc_sibling
);
503 if (cpuid_topo
->package_id
!= cpu_topo
->package_id
)
506 cpumask_set_cpu(cpuid
, &cpu_topo
->core_sibling
);
507 cpumask_set_cpu(cpu
, &cpuid_topo
->core_sibling
);
509 if (cpuid_topo
->core_id
!= cpu_topo
->core_id
)
512 cpumask_set_cpu(cpuid
, &cpu_topo
->thread_sibling
);
513 cpumask_set_cpu(cpu
, &cpuid_topo
->thread_sibling
);
517 static void clear_cpu_topology(int cpu
)
519 struct cpu_topology
*cpu_topo
= &cpu_topology
[cpu
];
521 cpumask_clear(&cpu_topo
->llc_sibling
);
522 cpumask_set_cpu(cpu
, &cpu_topo
->llc_sibling
);
524 cpumask_clear(&cpu_topo
->core_sibling
);
525 cpumask_set_cpu(cpu
, &cpu_topo
->core_sibling
);
526 cpumask_clear(&cpu_topo
->thread_sibling
);
527 cpumask_set_cpu(cpu
, &cpu_topo
->thread_sibling
);
530 void __init
reset_cpu_topology(void)
534 for_each_possible_cpu(cpu
) {
535 struct cpu_topology
*cpu_topo
= &cpu_topology
[cpu
];
537 cpu_topo
->thread_id
= -1;
538 cpu_topo
->core_id
= -1;
539 cpu_topo
->package_id
= -1;
540 cpu_topo
->llc_id
= -1;
542 clear_cpu_topology(cpu
);
546 void remove_cpu_topology(unsigned int cpu
)
550 for_each_cpu(sibling
, topology_core_cpumask(cpu
))
551 cpumask_clear_cpu(cpu
, topology_core_cpumask(sibling
));
552 for_each_cpu(sibling
, topology_sibling_cpumask(cpu
))
553 cpumask_clear_cpu(cpu
, topology_sibling_cpumask(sibling
));
554 for_each_cpu(sibling
, topology_llc_cpumask(cpu
))
555 cpumask_clear_cpu(cpu
, topology_llc_cpumask(sibling
));
557 clear_cpu_topology(cpu
);
560 __weak
int __init
parse_acpi_topology(void)
565 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
566 void __init
init_cpu_topology(void)
568 reset_cpu_topology();
571 * Discard anything that was parsed if we hit an error so we
572 * don't use partial information.
574 if (parse_acpi_topology())
575 reset_cpu_topology();
576 else if (of_have_populated_dt() && parse_dt_topology())
577 reset_cpu_topology();