2 * arch/arm64/kernel/topology.c
4 * Copyright (C) 2011,2013,2014 Linaro Limited.
6 * Based on the arm32 version written by Vincent Guittot in turn based on
7 * arch/sh/kernel/topology.c
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/acpi.h>
15 #include <linux/arch_topology.h>
16 #include <linux/cacheinfo.h>
17 #include <linux/cpu.h>
18 #include <linux/cpumask.h>
19 #include <linux/init.h>
20 #include <linux/percpu.h>
21 #include <linux/node.h>
22 #include <linux/nodemask.h>
24 #include <linux/sched.h>
25 #include <linux/sched/topology.h>
26 #include <linux/slab.h>
27 #include <linux/smp.h>
28 #include <linux/string.h>
31 #include <asm/cputype.h>
32 #include <asm/topology.h>
34 static int __init
get_cpu_for_node(struct device_node
*node
)
36 struct device_node
*cpu_node
;
39 cpu_node
= of_parse_phandle(node
, "cpu", 0);
43 cpu
= of_cpu_node_to_id(cpu_node
);
45 topology_parse_cpu_capacity(cpu_node
, cpu
);
47 pr_crit("Unable to find CPU node for %pOF\n", cpu_node
);
49 of_node_put(cpu_node
);
53 static int __init
parse_core(struct device_node
*core
, int package_id
,
60 struct device_node
*t
;
63 snprintf(name
, sizeof(name
), "thread%d", i
);
64 t
= of_get_child_by_name(core
, name
);
67 cpu
= get_cpu_for_node(t
);
69 cpu_topology
[cpu
].package_id
= package_id
;
70 cpu_topology
[cpu
].core_id
= core_id
;
71 cpu_topology
[cpu
].thread_id
= i
;
73 pr_err("%pOF: Can't get CPU for thread\n",
83 cpu
= get_cpu_for_node(core
);
86 pr_err("%pOF: Core has both threads and CPU\n",
91 cpu_topology
[cpu
].package_id
= package_id
;
92 cpu_topology
[cpu
].core_id
= core_id
;
94 pr_err("%pOF: Can't get CPU for leaf core\n", core
);
101 static int __init
parse_cluster(struct device_node
*cluster
, int depth
)
105 bool has_cores
= false;
106 struct device_node
*c
;
107 static int package_id __initdata
;
112 * First check for child clusters; we currently ignore any
113 * information about the nesting of clusters and present the
114 * scheduler with a flat list of them.
118 snprintf(name
, sizeof(name
), "cluster%d", i
);
119 c
= of_get_child_by_name(cluster
, name
);
122 ret
= parse_cluster(c
, depth
+ 1);
130 /* Now check for cores */
133 snprintf(name
, sizeof(name
), "core%d", i
);
134 c
= of_get_child_by_name(cluster
, name
);
139 pr_err("%pOF: cpu-map children should be clusters\n",
146 ret
= parse_core(c
, package_id
, core_id
++);
148 pr_err("%pOF: Non-leaf cluster with core %s\n",
160 if (leaf
&& !has_cores
)
161 pr_warn("%pOF: empty cluster\n", cluster
);
169 static int __init
parse_dt_topology(void)
171 struct device_node
*cn
, *map
;
175 cn
= of_find_node_by_path("/cpus");
177 pr_err("No CPU information found in DT\n");
182 * When topology is provided cpu-map is essentially a root
183 * cluster with restricted subnodes.
185 map
= of_get_child_by_name(cn
, "cpu-map");
189 ret
= parse_cluster(map
, 0);
193 topology_normalize_cpu_scale();
196 * Check that all cores are in the topology; the SMP code will
197 * only mark cores described in the DT as possible.
199 for_each_possible_cpu(cpu
)
200 if (cpu_topology
[cpu
].package_id
== -1)
213 struct cpu_topology cpu_topology
[NR_CPUS
];
214 EXPORT_SYMBOL_GPL(cpu_topology
);
216 const struct cpumask
*cpu_coregroup_mask(int cpu
)
218 const cpumask_t
*core_mask
= cpumask_of_node(cpu_to_node(cpu
));
220 /* Find the smaller of NUMA, core or LLC siblings */
221 if (cpumask_subset(&cpu_topology
[cpu
].core_sibling
, core_mask
)) {
222 /* not numa in package, lets use the package siblings */
223 core_mask
= &cpu_topology
[cpu
].core_sibling
;
225 if (cpu_topology
[cpu
].llc_id
!= -1) {
226 if (cpumask_subset(&cpu_topology
[cpu
].llc_sibling
, core_mask
))
227 core_mask
= &cpu_topology
[cpu
].llc_sibling
;
233 static void update_siblings_masks(unsigned int cpuid
)
235 struct cpu_topology
*cpu_topo
, *cpuid_topo
= &cpu_topology
[cpuid
];
238 /* update core and thread sibling masks */
239 for_each_online_cpu(cpu
) {
240 cpu_topo
= &cpu_topology
[cpu
];
242 if (cpuid_topo
->llc_id
== cpu_topo
->llc_id
) {
243 cpumask_set_cpu(cpu
, &cpuid_topo
->llc_sibling
);
244 cpumask_set_cpu(cpuid
, &cpu_topo
->llc_sibling
);
247 if (cpuid_topo
->package_id
!= cpu_topo
->package_id
)
250 cpumask_set_cpu(cpuid
, &cpu_topo
->core_sibling
);
251 cpumask_set_cpu(cpu
, &cpuid_topo
->core_sibling
);
253 if (cpuid_topo
->core_id
!= cpu_topo
->core_id
)
256 cpumask_set_cpu(cpuid
, &cpu_topo
->thread_sibling
);
257 cpumask_set_cpu(cpu
, &cpuid_topo
->thread_sibling
);
261 void store_cpu_topology(unsigned int cpuid
)
263 struct cpu_topology
*cpuid_topo
= &cpu_topology
[cpuid
];
266 if (cpuid_topo
->package_id
!= -1)
267 goto topology_populated
;
269 mpidr
= read_cpuid_mpidr();
271 /* Uniprocessor systems can rely on default topology values */
272 if (mpidr
& MPIDR_UP_BITMASK
)
275 /* Create cpu topology mapping based on MPIDR. */
276 if (mpidr
& MPIDR_MT_BITMASK
) {
277 /* Multiprocessor system : Multi-threads per core */
278 cpuid_topo
->thread_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 0);
279 cpuid_topo
->core_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 1);
280 cpuid_topo
->package_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 2) |
281 MPIDR_AFFINITY_LEVEL(mpidr
, 3) << 8;
283 /* Multiprocessor system : Single-thread per core */
284 cpuid_topo
->thread_id
= -1;
285 cpuid_topo
->core_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 0);
286 cpuid_topo
->package_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 1) |
287 MPIDR_AFFINITY_LEVEL(mpidr
, 2) << 8 |
288 MPIDR_AFFINITY_LEVEL(mpidr
, 3) << 16;
291 pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
292 cpuid
, cpuid_topo
->package_id
, cpuid_topo
->core_id
,
293 cpuid_topo
->thread_id
, mpidr
);
296 update_siblings_masks(cpuid
);
299 static void clear_cpu_topology(int cpu
)
301 struct cpu_topology
*cpu_topo
= &cpu_topology
[cpu
];
303 cpumask_clear(&cpu_topo
->llc_sibling
);
304 cpumask_set_cpu(cpu
, &cpu_topo
->llc_sibling
);
306 cpumask_clear(&cpu_topo
->core_sibling
);
307 cpumask_set_cpu(cpu
, &cpu_topo
->core_sibling
);
308 cpumask_clear(&cpu_topo
->thread_sibling
);
309 cpumask_set_cpu(cpu
, &cpu_topo
->thread_sibling
);
312 static void __init
reset_cpu_topology(void)
316 for_each_possible_cpu(cpu
) {
317 struct cpu_topology
*cpu_topo
= &cpu_topology
[cpu
];
319 cpu_topo
->thread_id
= -1;
320 cpu_topo
->core_id
= 0;
321 cpu_topo
->package_id
= -1;
322 cpu_topo
->llc_id
= -1;
324 clear_cpu_topology(cpu
);
328 void remove_cpu_topology(unsigned int cpu
)
332 for_each_cpu(sibling
, topology_core_cpumask(cpu
))
333 cpumask_clear_cpu(cpu
, topology_core_cpumask(sibling
));
334 for_each_cpu(sibling
, topology_sibling_cpumask(cpu
))
335 cpumask_clear_cpu(cpu
, topology_sibling_cpumask(sibling
));
336 for_each_cpu(sibling
, topology_llc_cpumask(cpu
))
337 cpumask_clear_cpu(cpu
, topology_llc_cpumask(sibling
));
339 clear_cpu_topology(cpu
);
344 * Propagate the topology information of the processor_topology_node tree to the
345 * cpu_topology array.
347 static int __init
parse_acpi_topology(void)
350 int cpu
, topology_id
;
352 is_threaded
= read_cpuid_mpidr() & MPIDR_MT_BITMASK
;
354 for_each_possible_cpu(cpu
) {
357 topology_id
= find_acpi_cpu_topology(cpu
, 0);
362 cpu_topology
[cpu
].thread_id
= topology_id
;
363 topology_id
= find_acpi_cpu_topology(cpu
, 1);
364 cpu_topology
[cpu
].core_id
= topology_id
;
366 cpu_topology
[cpu
].thread_id
= -1;
367 cpu_topology
[cpu
].core_id
= topology_id
;
369 topology_id
= find_acpi_cpu_topology_package(cpu
);
370 cpu_topology
[cpu
].package_id
= topology_id
;
372 i
= acpi_find_last_cache_level(cpu
);
376 * this is the only part of cpu_topology that has
377 * a direct relationship with the cache topology
379 cache_id
= find_acpi_cpu_cache_topology(cpu
, i
);
381 cpu_topology
[cpu
].llc_id
= cache_id
;
389 static inline int __init
parse_acpi_topology(void)
395 void __init
init_cpu_topology(void)
397 reset_cpu_topology();
400 * Discard anything that was parsed if we hit an error so we
401 * don't use partial information.
403 if (!acpi_disabled
&& parse_acpi_topology())
404 reset_cpu_topology();
405 else if (of_have_populated_dt() && parse_dt_topology())
406 reset_cpu_topology();