2 * arch/arm64/kernel/topology.c
4 * Copyright (C) 2011,2013,2014 Linaro Limited.
6 * Based on the arm32 version written by Vincent Guittot in turn based on
7 * arch/sh/kernel/topology.c
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/acpi.h>
15 #include <linux/arch_topology.h>
16 #include <linux/cacheinfo.h>
17 #include <linux/cpu.h>
18 #include <linux/cpumask.h>
19 #include <linux/init.h>
20 #include <linux/percpu.h>
21 #include <linux/node.h>
22 #include <linux/nodemask.h>
24 #include <linux/sched.h>
25 #include <linux/sched/topology.h>
26 #include <linux/slab.h>
27 #include <linux/smp.h>
28 #include <linux/string.h>
31 #include <asm/cputype.h>
32 #include <asm/topology.h>
34 static int __init
get_cpu_for_node(struct device_node
*node
)
36 struct device_node
*cpu_node
;
39 cpu_node
= of_parse_phandle(node
, "cpu", 0);
43 cpu
= of_cpu_node_to_id(cpu_node
);
45 topology_parse_cpu_capacity(cpu_node
, cpu
);
47 pr_crit("Unable to find CPU node for %pOF\n", cpu_node
);
49 of_node_put(cpu_node
);
53 static int __init
parse_core(struct device_node
*core
, int package_id
,
60 struct device_node
*t
;
63 snprintf(name
, sizeof(name
), "thread%d", i
);
64 t
= of_get_child_by_name(core
, name
);
67 cpu
= get_cpu_for_node(t
);
69 cpu_topology
[cpu
].package_id
= package_id
;
70 cpu_topology
[cpu
].core_id
= core_id
;
71 cpu_topology
[cpu
].thread_id
= i
;
73 pr_err("%pOF: Can't get CPU for thread\n",
83 cpu
= get_cpu_for_node(core
);
86 pr_err("%pOF: Core has both threads and CPU\n",
91 cpu_topology
[cpu
].package_id
= package_id
;
92 cpu_topology
[cpu
].core_id
= core_id
;
94 pr_err("%pOF: Can't get CPU for leaf core\n", core
);
101 static int __init
parse_cluster(struct device_node
*cluster
, int depth
)
105 bool has_cores
= false;
106 struct device_node
*c
;
107 static int package_id __initdata
;
112 * First check for child clusters; we currently ignore any
113 * information about the nesting of clusters and present the
114 * scheduler with a flat list of them.
118 snprintf(name
, sizeof(name
), "cluster%d", i
);
119 c
= of_get_child_by_name(cluster
, name
);
122 ret
= parse_cluster(c
, depth
+ 1);
130 /* Now check for cores */
133 snprintf(name
, sizeof(name
), "core%d", i
);
134 c
= of_get_child_by_name(cluster
, name
);
139 pr_err("%pOF: cpu-map children should be clusters\n",
146 ret
= parse_core(c
, package_id
, core_id
++);
148 pr_err("%pOF: Non-leaf cluster with core %s\n",
160 if (leaf
&& !has_cores
)
161 pr_warn("%pOF: empty cluster\n", cluster
);
169 static int __init
parse_dt_topology(void)
171 struct device_node
*cn
, *map
;
175 cn
= of_find_node_by_path("/cpus");
177 pr_err("No CPU information found in DT\n");
182 * When topology is provided cpu-map is essentially a root
183 * cluster with restricted subnodes.
185 map
= of_get_child_by_name(cn
, "cpu-map");
189 ret
= parse_cluster(map
, 0);
193 topology_normalize_cpu_scale();
196 * Check that all cores are in the topology; the SMP code will
197 * only mark cores described in the DT as possible.
199 for_each_possible_cpu(cpu
)
200 if (cpu_topology
[cpu
].package_id
== -1)
213 struct cpu_topology cpu_topology
[NR_CPUS
];
214 EXPORT_SYMBOL_GPL(cpu_topology
);
216 const struct cpumask
*cpu_coregroup_mask(int cpu
)
218 const cpumask_t
*core_mask
= &cpu_topology
[cpu
].core_sibling
;
220 if (cpu_topology
[cpu
].llc_id
!= -1) {
221 if (cpumask_subset(&cpu_topology
[cpu
].llc_siblings
, core_mask
))
222 core_mask
= &cpu_topology
[cpu
].llc_siblings
;
228 static void update_siblings_masks(unsigned int cpuid
)
230 struct cpu_topology
*cpu_topo
, *cpuid_topo
= &cpu_topology
[cpuid
];
233 /* update core and thread sibling masks */
234 for_each_possible_cpu(cpu
) {
235 cpu_topo
= &cpu_topology
[cpu
];
237 if (cpuid_topo
->llc_id
== cpu_topo
->llc_id
) {
238 cpumask_set_cpu(cpu
, &cpuid_topo
->llc_siblings
);
239 cpumask_set_cpu(cpuid
, &cpu_topo
->llc_siblings
);
242 if (cpuid_topo
->package_id
!= cpu_topo
->package_id
)
245 cpumask_set_cpu(cpuid
, &cpu_topo
->core_sibling
);
247 cpumask_set_cpu(cpu
, &cpuid_topo
->core_sibling
);
249 if (cpuid_topo
->core_id
!= cpu_topo
->core_id
)
252 cpumask_set_cpu(cpuid
, &cpu_topo
->thread_sibling
);
254 cpumask_set_cpu(cpu
, &cpuid_topo
->thread_sibling
);
258 void store_cpu_topology(unsigned int cpuid
)
260 struct cpu_topology
*cpuid_topo
= &cpu_topology
[cpuid
];
263 if (cpuid_topo
->package_id
!= -1)
264 goto topology_populated
;
266 mpidr
= read_cpuid_mpidr();
268 /* Uniprocessor systems can rely on default topology values */
269 if (mpidr
& MPIDR_UP_BITMASK
)
272 /* Create cpu topology mapping based on MPIDR. */
273 if (mpidr
& MPIDR_MT_BITMASK
) {
274 /* Multiprocessor system : Multi-threads per core */
275 cpuid_topo
->thread_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 0);
276 cpuid_topo
->core_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 1);
277 cpuid_topo
->package_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 2) |
278 MPIDR_AFFINITY_LEVEL(mpidr
, 3) << 8;
280 /* Multiprocessor system : Single-thread per core */
281 cpuid_topo
->thread_id
= -1;
282 cpuid_topo
->core_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 0);
283 cpuid_topo
->package_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 1) |
284 MPIDR_AFFINITY_LEVEL(mpidr
, 2) << 8 |
285 MPIDR_AFFINITY_LEVEL(mpidr
, 3) << 16;
288 pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
289 cpuid
, cpuid_topo
->package_id
, cpuid_topo
->core_id
,
290 cpuid_topo
->thread_id
, mpidr
);
293 update_siblings_masks(cpuid
);
296 static void __init
reset_cpu_topology(void)
300 for_each_possible_cpu(cpu
) {
301 struct cpu_topology
*cpu_topo
= &cpu_topology
[cpu
];
303 cpu_topo
->thread_id
= -1;
304 cpu_topo
->core_id
= 0;
305 cpu_topo
->package_id
= -1;
307 cpu_topo
->llc_id
= -1;
308 cpumask_clear(&cpu_topo
->llc_siblings
);
309 cpumask_set_cpu(cpu
, &cpu_topo
->llc_siblings
);
311 cpumask_clear(&cpu_topo
->core_sibling
);
312 cpumask_set_cpu(cpu
, &cpu_topo
->core_sibling
);
313 cpumask_clear(&cpu_topo
->thread_sibling
);
314 cpumask_set_cpu(cpu
, &cpu_topo
->thread_sibling
);
320 * Propagate the topology information of the processor_topology_node tree to the
321 * cpu_topology array.
323 static int __init
parse_acpi_topology(void)
326 int cpu
, topology_id
;
328 is_threaded
= read_cpuid_mpidr() & MPIDR_MT_BITMASK
;
330 for_each_possible_cpu(cpu
) {
333 topology_id
= find_acpi_cpu_topology(cpu
, 0);
338 cpu_topology
[cpu
].thread_id
= topology_id
;
339 topology_id
= find_acpi_cpu_topology(cpu
, 1);
340 cpu_topology
[cpu
].core_id
= topology_id
;
342 cpu_topology
[cpu
].thread_id
= -1;
343 cpu_topology
[cpu
].core_id
= topology_id
;
345 topology_id
= find_acpi_cpu_topology_package(cpu
);
346 cpu_topology
[cpu
].package_id
= topology_id
;
348 i
= acpi_find_last_cache_level(cpu
);
352 * this is the only part of cpu_topology that has
353 * a direct relationship with the cache topology
355 cache_id
= find_acpi_cpu_cache_topology(cpu
, i
);
357 cpu_topology
[cpu
].llc_id
= cache_id
;
365 static inline int __init
parse_acpi_topology(void)
371 void __init
init_cpu_topology(void)
373 reset_cpu_topology();
376 * Discard anything that was parsed if we hit an error so we
377 * don't use partial information.
379 if (!acpi_disabled
&& parse_acpi_topology())
380 reset_cpu_topology();
381 else if (of_have_populated_dt() && parse_dt_topology())
382 reset_cpu_topology();